taglinefilesource code
vma164arch/alpha/kernel/ptrace.cstruct vm_area_struct * vma, unsigned long addr)
vma173arch/alpha/kernel/ptrace.cpgdir = pgd_offset(vma->vm_mm, addr);
vma175arch/alpha/kernel/ptrace.cdo_no_page(tsk, vma, addr, 0);
vma185arch/alpha/kernel/ptrace.cdo_no_page(tsk, vma, addr, 0);
vma195arch/alpha/kernel/ptrace.cdo_no_page(tsk, vma, addr, 0);
vma215arch/alpha/kernel/ptrace.cstatic void put_long(struct task_struct * tsk, struct vm_area_struct * vma,
vma224arch/alpha/kernel/ptrace.cpgdir = pgd_offset(vma->vm_mm, addr);
vma226arch/alpha/kernel/ptrace.cdo_no_page(tsk, vma, addr, 1);
vma236arch/alpha/kernel/ptrace.cdo_no_page(tsk, vma, addr, 1);
vma246arch/alpha/kernel/ptrace.cdo_no_page(tsk, vma, addr, 1);
vma251arch/alpha/kernel/ptrace.cdo_wp_page(tsk, vma, addr, 1);
vma259arch/alpha/kernel/ptrace.cset_pte(pgtable, pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
vma266arch/alpha/kernel/ptrace.cstruct vm_area_struct * vma;
vma269arch/alpha/kernel/ptrace.cvma = find_vma(tsk,addr);
vma270arch/alpha/kernel/ptrace.cif (!vma)
vma272arch/alpha/kernel/ptrace.cif (vma->vm_start <= addr)
vma273arch/alpha/kernel/ptrace.creturn vma;
vma274arch/alpha/kernel/ptrace.cif (!(vma->vm_flags & VM_GROWSDOWN))
vma276arch/alpha/kernel/ptrace.cif (vma->vm_end - addr > tsk->rlim[RLIMIT_STACK].rlim_cur)
vma278arch/alpha/kernel/ptrace.cvma->vm_offset -= vma->vm_start - addr;
vma279arch/alpha/kernel/ptrace.cvma->vm_start = addr;
vma280arch/alpha/kernel/ptrace.creturn vma;
vma290arch/alpha/kernel/ptrace.cstruct vm_area_struct * vma = find_extend_vma(tsk, addr);
vma293arch/alpha/kernel/ptrace.cif (!vma) {
vma298arch/alpha/kernel/ptrace.cstruct vm_area_struct * vma_high = vma;
vma301arch/alpha/kernel/ptrace.cif (addr + sizeof(long) >= vma->vm_end) {
vma302arch/alpha/kernel/ptrace.cvma_high = vma->vm_next;
vma303arch/alpha/kernel/ptrace.cif (!vma_high || vma_high->vm_start != vma->vm_end)
vma308arch/alpha/kernel/ptrace.clow = get_long(tsk, vma, addr);
vma318arch/alpha/kernel/ptrace.clong l = get_long(tsk, vma, addr);
vma333arch/alpha/kernel/ptrace.cstruct vm_area_struct * vma = find_extend_vma(tsk, addr);
vma335arch/alpha/kernel/ptrace.cif (!vma)
vma339arch/alpha/kernel/ptrace.cstruct vm_area_struct * vma_high = vma;
vma341arch/alpha/kernel/ptrace.cif (addr + sizeof(long) >= vma->vm_end) {
vma342arch/alpha/kernel/ptrace.cvma_high = vma->vm_next;
vma343arch/alpha/kernel/ptrace.cif (!vma_high || vma_high->vm_start != vma->vm_end)
vma348arch/alpha/kernel/ptrace.clow  = get_long(tsk, vma, addr);
vma354arch/alpha/kernel/ptrace.cput_long(tsk, vma, addr, low);
vma357arch/alpha/kernel/ptrace.cput_long(tsk, vma, addr, data);
vma60arch/alpha/mm/fault.cstruct vm_area_struct * vma;
vma62arch/alpha/mm/fault.cvma = find_vma(current, address);
vma63arch/alpha/mm/fault.cif (!vma)
vma65arch/alpha/mm/fault.cif (vma->vm_start <= address)
vma67arch/alpha/mm/fault.cif (!(vma->vm_flags & VM_GROWSDOWN))
vma69arch/alpha/mm/fault.cif (expand_stack(vma, address))
vma77arch/alpha/mm/fault.cif (!(vma->vm_flags & VM_EXEC))
vma81arch/alpha/mm/fault.cif (!(vma->vm_flags & (VM_READ | VM_WRITE)))
vma84arch/alpha/mm/fault.cif (!(vma->vm_flags & VM_WRITE))
vma88arch/alpha/mm/fault.chandle_mm_fault(vma, address, cause > 0);
vma87arch/i386/kernel/ptrace.cstruct vm_area_struct * vma, unsigned long addr)
vma95arch/i386/kernel/ptrace.cpgdir = pgd_offset(vma->vm_mm, addr);
vma97arch/i386/kernel/ptrace.cdo_no_page(tsk, vma, addr, 0);
vma107arch/i386/kernel/ptrace.cdo_no_page(tsk, vma, addr, 0);
vma117arch/i386/kernel/ptrace.cdo_no_page(tsk, vma, addr, 0);
vma137arch/i386/kernel/ptrace.cstatic void put_long(struct task_struct * tsk, struct vm_area_struct * vma, unsigned long addr,
vma146arch/i386/kernel/ptrace.cpgdir = pgd_offset(vma->vm_mm, addr);
vma148arch/i386/kernel/ptrace.cdo_no_page(tsk, vma, addr, 1);
vma158arch/i386/kernel/ptrace.cdo_no_page(tsk, vma, addr, 1);
vma168arch/i386/kernel/ptrace.cdo_no_page(tsk, vma, addr, 1);
vma173arch/i386/kernel/ptrace.cdo_wp_page(tsk, vma, addr, 1);
vma181arch/i386/kernel/ptrace.cset_pte(pgtable, pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
vma187arch/i386/kernel/ptrace.cstruct vm_area_struct * vma;
vma190arch/i386/kernel/ptrace.cvma = find_vma(tsk,addr);
vma191arch/i386/kernel/ptrace.cif (!vma)
vma193arch/i386/kernel/ptrace.cif (vma->vm_start <= addr)
vma194arch/i386/kernel/ptrace.creturn vma;
vma195arch/i386/kernel/ptrace.cif (!(vma->vm_flags & VM_GROWSDOWN))
vma197arch/i386/kernel/ptrace.cif (vma->vm_end - addr > tsk->rlim[RLIMIT_STACK].rlim_cur)
vma199arch/i386/kernel/ptrace.cvma->vm_offset -= vma->vm_start - addr;
vma200arch/i386/kernel/ptrace.cvma->vm_start = addr;
vma201arch/i386/kernel/ptrace.creturn vma;
vma211arch/i386/kernel/ptrace.cstruct vm_area_struct * vma = find_extend_vma(tsk, addr);
vma213arch/i386/kernel/ptrace.cif (!vma)
vma217arch/i386/kernel/ptrace.cstruct vm_area_struct * vma_high = vma;
vma219arch/i386/kernel/ptrace.cif (addr + sizeof(long) >= vma->vm_end) {
vma220arch/i386/kernel/ptrace.cvma_high = vma->vm_next;
vma221arch/i386/kernel/ptrace.cif (!vma_high || vma_high->vm_start != vma->vm_end)
vma224arch/i386/kernel/ptrace.clow = get_long(tsk, vma, addr & ~(sizeof(long)-1));
vma242arch/i386/kernel/ptrace.c*result = get_long(tsk, vma, addr);
vma253arch/i386/kernel/ptrace.cstruct vm_area_struct * vma = find_extend_vma(tsk, addr);
vma255arch/i386/kernel/ptrace.cif (!vma)
vma259arch/i386/kernel/ptrace.cstruct vm_area_struct * vma_high = vma;
vma261arch/i386/kernel/ptrace.cif (addr + sizeof(long) >= vma->vm_end) {
vma262arch/i386/kernel/ptrace.cvma_high = vma->vm_next;
vma263arch/i386/kernel/ptrace.cif (!vma_high || vma_high->vm_start != vma->vm_end)
vma266arch/i386/kernel/ptrace.clow = get_long(tsk, vma, addr & ~(sizeof(long)-1));
vma291arch/i386/kernel/ptrace.cput_long(tsk, vma, addr & ~(sizeof(long)-1),low);
vma294arch/i386/kernel/ptrace.cput_long(tsk, vma, addr, data);
vma36arch/i386/mm/fault.cstruct vm_area_struct * vma;
vma42arch/i386/mm/fault.cvma = find_vma(current, address);
vma43arch/i386/mm/fault.cif (!vma)
vma45arch/i386/mm/fault.cif (vma->vm_start <= address)
vma47arch/i386/mm/fault.cif (!(vma->vm_flags & VM_GROWSDOWN))
vma59arch/i386/mm/fault.cif (expand_stack(vma, address))
vma70arch/i386/mm/fault.cif (!(vma->vm_flags & VM_WRITE))
vma76arch/i386/mm/fault.cif (!(vma->vm_flags & (VM_READ | VM_EXEC)))
vma92arch/i386/mm/fault.cdo_wp_page(current, vma, address, error_code & 2);
vma95arch/i386/mm/fault.cdo_no_page(current, vma, address, error_code & 2);
vma107arch/m68k/kernel/ptrace.cstruct vm_area_struct * vma, unsigned long addr)
vma115arch/m68k/kernel/ptrace.cpgdir = pgd_offset(vma->vm_mm, addr);
vma117arch/m68k/kernel/ptrace.cdo_no_page(tsk, vma, addr, 0);
vma127arch/m68k/kernel/ptrace.cdo_no_page(tsk, vma, addr, 0);
vma138arch/m68k/kernel/ptrace.cdo_no_page(tsk, vma, addr, 0);
vma158arch/m68k/kernel/ptrace.cstatic void put_long(struct task_struct * tsk, struct vm_area_struct * vma, unsigned long addr,
vma167arch/m68k/kernel/ptrace.cpgdir = pgd_offset(vma->vm_mm, addr);
vma169arch/m68k/kernel/ptrace.cdo_no_page(tsk, vma, addr, 1);
vma179arch/m68k/kernel/ptrace.cdo_no_page(tsk, vma, addr, 1);
vma190arch/m68k/kernel/ptrace.cdo_no_page(tsk, vma, addr, 1);
vma195arch/m68k/kernel/ptrace.cdo_wp_page(tsk, vma, addr, 2);
vma204arch/m68k/kernel/ptrace.c*pgtable = pte_mkdirty(mk_pte(page, vma->vm_page_prot));
vma210arch/m68k/kernel/ptrace.cstruct vm_area_struct * vma;
vma213arch/m68k/kernel/ptrace.cvma = find_vma(tsk,addr);
vma214arch/m68k/kernel/ptrace.cif (!vma)
vma216arch/m68k/kernel/ptrace.cif (vma->vm_start <= addr)
vma217arch/m68k/kernel/ptrace.creturn vma;
vma218arch/m68k/kernel/ptrace.cif (!(vma->vm_flags & VM_GROWSDOWN))
vma220arch/m68k/kernel/ptrace.cif (vma->vm_end - addr > tsk->rlim[RLIMIT_STACK].rlim_cur)
vma222arch/m68k/kernel/ptrace.cvma->vm_offset -= vma->vm_start - addr;
vma223arch/m68k/kernel/ptrace.cvma->vm_start = addr;
vma224arch/m68k/kernel/ptrace.creturn vma;
vma234arch/m68k/kernel/ptrace.cstruct vm_area_struct * vma = find_extend_vma(tsk, addr);
vma236arch/m68k/kernel/ptrace.cif (!vma)
vma240arch/m68k/kernel/ptrace.cstruct vm_area_struct * vma_low = vma;
vma242arch/m68k/kernel/ptrace.cif (addr + sizeof(long) >= vma->vm_end) {
vma243arch/m68k/kernel/ptrace.cvma_low = vma->vm_next;
vma244arch/m68k/kernel/ptrace.cif (!vma_low || vma_low->vm_start != vma->vm_end)
vma247arch/m68k/kernel/ptrace.chigh = get_long(tsk, vma,addr & ~(sizeof(long)-1));
vma265arch/m68k/kernel/ptrace.c*result = get_long(tsk, vma,addr);
vma276arch/m68k/kernel/ptrace.cstruct vm_area_struct * vma = find_extend_vma(tsk, addr);
vma278arch/m68k/kernel/ptrace.cif (!vma)
vma282arch/m68k/kernel/ptrace.cstruct vm_area_struct * vma_low = vma;
vma284arch/m68k/kernel/ptrace.cif (addr + sizeof(long) >= vma->vm_end) {
vma285arch/m68k/kernel/ptrace.cvma_low = vma->vm_next;
vma286arch/m68k/kernel/ptrace.cif (!vma_low || vma_low->vm_start != vma->vm_end)
vma289arch/m68k/kernel/ptrace.chigh = get_long(tsk, vma,addr & ~(sizeof(long)-1));
vma314arch/m68k/kernel/ptrace.cput_long(tsk, vma,addr & ~(sizeof(long)-1),high);
vma317arch/m68k/kernel/ptrace.cput_long(tsk, vma,addr,data);
vma31arch/m68k/mm/fault.cstruct vm_area_struct * vma;
vma39arch/m68k/mm/fault.cvma = find_vma(current, address);
vma40arch/m68k/mm/fault.cif (!vma)
vma42arch/m68k/mm/fault.cif (vma->vm_start <= address)
vma44arch/m68k/mm/fault.cif (!(vma->vm_flags & VM_GROWSDOWN))
vma54arch/m68k/mm/fault.cif (expand_stack(vma, address))
vma66arch/m68k/mm/fault.cif (!(vma->vm_flags & VM_WRITE))
vma72arch/m68k/mm/fault.cif (!(vma->vm_flags & (VM_READ | VM_EXEC)))
vma76arch/m68k/mm/fault.cdo_wp_page(current, vma, address, error_code & 2);
vma79arch/m68k/mm/fault.cdo_no_page(current, vma, address, error_code & 2);
vma86arch/mips/kernel/ptrace.cstatic unsigned long get_long(struct vm_area_struct * vma, unsigned long addr)
vma93arch/mips/kernel/ptrace.cpgdir = PAGE_DIR_OFFSET(vma->vm_mm, addr);
vma95arch/mips/kernel/ptrace.cdo_no_page(vma, addr, 0);
vma105arch/mips/kernel/ptrace.cdo_no_page(vma, addr, 0);
vma125arch/mips/kernel/ptrace.cstatic void put_long(struct vm_area_struct * vma, unsigned long addr,
vma133arch/mips/kernel/ptrace.cpgdir = PAGE_DIR_OFFSET(vma->vm_mm, addr);
vma135arch/mips/kernel/ptrace.cdo_no_page(vma, addr, 1);
vma145arch/mips/kernel/ptrace.cdo_no_page(vma, addr, 1);
vma150arch/mips/kernel/ptrace.cdo_wp_page(vma, addr, 1);
vma158arch/mips/kernel/ptrace.cset_pte(pgtable, pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
vma164arch/mips/kernel/ptrace.cstruct vm_area_struct * vma;
vma167arch/mips/kernel/ptrace.cvma = find_vma(tsk, addr);
vma168arch/mips/kernel/ptrace.cif (!vma)
vma170arch/mips/kernel/ptrace.cif (vma->vm_start <= addr)
vma171arch/mips/kernel/ptrace.creturn vma;
vma172arch/mips/kernel/ptrace.cif (!(vma->vm_flags & VM_GROWSDOWN))
vma174arch/mips/kernel/ptrace.cif (vma->vm_end - addr > tsk->rlim[RLIMIT_STACK].rlim_cur)
vma176arch/mips/kernel/ptrace.cvma->vm_offset -= vma->vm_start - addr;
vma177arch/mips/kernel/ptrace.cvma->vm_start = addr;
vma178arch/mips/kernel/ptrace.creturn vma;
vma188arch/mips/kernel/ptrace.cstruct vm_area_struct * vma = find_extend_vma(tsk, addr);
vma190arch/mips/kernel/ptrace.cif (!vma)
vma194arch/mips/kernel/ptrace.cstruct vm_area_struct * vma_high = vma;
vma196arch/mips/kernel/ptrace.cif (addr + sizeof(long) >= vma->vm_end) {
vma197arch/mips/kernel/ptrace.cvma_high = vma->vm_next;
vma198arch/mips/kernel/ptrace.cif (!vma_high || vma_high->vm_start != vma->vm_end)
vma201arch/mips/kernel/ptrace.clow = get_long(vma, addr & ~(sizeof(long)-1));
vma219arch/mips/kernel/ptrace.c*result = get_long(vma, addr);
vma230arch/mips/kernel/ptrace.cstruct vm_area_struct * vma = find_extend_vma(tsk, addr);
vma232arch/mips/kernel/ptrace.cif (!vma)
vma236arch/mips/kernel/ptrace.cstruct vm_area_struct * vma_high = vma;
vma238arch/mips/kernel/ptrace.cif (addr + sizeof(long) >= vma->vm_end) {
vma239arch/mips/kernel/ptrace.cvma_high = vma->vm_next;
vma240arch/mips/kernel/ptrace.cif (!vma_high || vma_high->vm_start != vma->vm_end)
vma243arch/mips/kernel/ptrace.clow = get_long(vma, addr & ~(sizeof(long)-1));
vma268arch/mips/kernel/ptrace.cput_long(vma, addr & ~(sizeof(long)-1),low);
vma271arch/mips/kernel/ptrace.cput_long(vma, addr, data);
vma36arch/mips/kernel/sysmips.cstruct vm_area_struct * vma;
vma38arch/mips/kernel/sysmips.cvma = find_vma(current, address);
vma39arch/mips/kernel/sysmips.cif (!vma || vma->vm_start > address || !(vma->vm_flags & VM_READ))
vma41arch/mips/kernel/sysmips.caddress = vma->vm_end - address;
vma44arch/mips/kernel/sysmips.cif (vma->vm_next && vma->vm_next->vm_start == vma->vm_end &&
vma45arch/mips/kernel/sysmips.c(vma->vm_next->vm_flags & VM_READ))
vma31arch/mips/mm/fault.cstruct vm_area_struct * vma;
vma38arch/mips/mm/fault.cvma = find_vma(current, address);
vma39arch/mips/mm/fault.cif (!vma)
vma41arch/mips/mm/fault.cif (vma->vm_start <= address)
vma43arch/mips/mm/fault.cif (!(vma->vm_flags & VM_GROWSDOWN))
vma45arch/mips/mm/fault.cif (expand_stack(vma, address))
vma53arch/mips/mm/fault.cif (!(vma->vm_flags & VM_WRITE))
vma56arch/mips/mm/fault.cif (!(vma->vm_flags & (VM_READ | VM_EXEC)))
vma59arch/mips/mm/fault.chandle_mm_fault(vma, address, writeaccess);
vma185arch/ppc/mm/fault.cstruct vm_area_struct * vma;
vma190arch/ppc/mm/fault.cfor (vma = current->mm->mmap ; ; vma = vma->vm_next)
vma192arch/ppc/mm/fault.cif (!vma)
vma198arch/ppc/mm/fault.cif (vma->vm_end > address)
vma202arch/ppc/mm/fault.cvma = find_vma(current, address);
vma203arch/ppc/mm/fault.cif (!vma)
vma208arch/ppc/mm/fault.cif (vma->vm_start <= address){
vma211arch/ppc/mm/fault.cif (!(vma->vm_flags & VM_GROWSDOWN))
vma217arch/ppc/mm/fault.cif (vma->vm_end - address > current->rlim[RLIMIT_STACK].rlim_cur)
vma219arch/ppc/mm/fault.cprintk("stack2: vma->vm_end-address %x rlim %x\n", vma->vm_end - address,
vma221arch/ppc/mm/fault.cprintk("stack2: vm_end %x address = %x\n", vma->vm_end,address);
vma226arch/ppc/mm/fault.cvma->vm_offset -= vma->vm_start - (address & PAGE_MASK);
vma227arch/ppc/mm/fault.cvma->vm_start = (address & PAGE_MASK);
vma238arch/ppc/mm/fault.cif (!(vma->vm_flags & VM_WRITE))
vma242arch/ppc/mm/fault.ccurrent,address,vma->vm_flags,current->mm,vma,vma->vm_start,vma->vm_end);
vma252arch/ppc/mm/fault.cif (!(vma->vm_flags & (VM_READ | VM_EXEC)))
vma255arch/ppc/mm/fault.c_printk("vma = %x\n", vma);
vma256arch/ppc/mm/fault.c_printk("vma->vm_flags = %x\n", vma->vm_flags);      
vma261arch/ppc/mm/fault.cvma, VM_READ,VM_EXEC);
vma263arch/ppc/mm/fault.cvma->vm_start, vma->vm_end);
vma266arch/ppc/mm/fault.cprintk("vma->vm_flags = %x\n", vma->vm_flags);
vma273arch/ppc/mm/fault.chandle_mm_fault(vma, address, error_code & 2); 
vma296arch/ppc/mm/fault.ccurrent,address,vma->vm_flags,current->mm,vma,vma->vm_start,vma->vm_end);
vma38arch/sparc/kernel/ptrace.cstruct vm_area_struct * vma, unsigned long addr)
vma46arch/sparc/kernel/ptrace.cpgdir = pgd_offset(vma->vm_mm, addr);
vma48arch/sparc/kernel/ptrace.cdo_no_page(tsk, vma, addr, 0);
vma58arch/sparc/kernel/ptrace.cdo_no_page(tsk, vma, addr, 0);
vma68arch/sparc/kernel/ptrace.cdo_no_page(tsk, vma, addr, 0);
vma90arch/sparc/kernel/ptrace.cstatic void put_long(struct task_struct * tsk, struct vm_area_struct * vma,
vma99arch/sparc/kernel/ptrace.cpgdir = pgd_offset(vma->vm_mm, addr);
vma101arch/sparc/kernel/ptrace.cdo_no_page(tsk, vma, addr, 1);
vma111arch/sparc/kernel/ptrace.cdo_no_page(tsk, vma, addr, 1);
vma121arch/sparc/kernel/ptrace.cdo_no_page(tsk, vma, addr, 1);
vma126arch/sparc/kernel/ptrace.cdo_wp_page(tsk, vma, addr, 1);
vma130arch/sparc/kernel/ptrace.cflush_cache_page(vma, page);
vma137arch/sparc/kernel/ptrace.cset_pte(pgtable, pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
vma138arch/sparc/kernel/ptrace.cflush_tlb_page(vma, page);
vma144arch/sparc/kernel/ptrace.cstruct vm_area_struct * vma;
vma147arch/sparc/kernel/ptrace.cvma = find_vma(tsk,addr);
vma148arch/sparc/kernel/ptrace.cif (!vma)
vma150arch/sparc/kernel/ptrace.cif (vma->vm_start <= addr)
vma151arch/sparc/kernel/ptrace.creturn vma;
vma152arch/sparc/kernel/ptrace.cif (!(vma->vm_flags & VM_GROWSDOWN))
vma154arch/sparc/kernel/ptrace.cif (vma->vm_end - addr > tsk->rlim[RLIMIT_STACK].rlim_cur)
vma156arch/sparc/kernel/ptrace.cvma->vm_offset -= vma->vm_start - addr;
vma157arch/sparc/kernel/ptrace.cvma->vm_start = addr;
vma158arch/sparc/kernel/ptrace.creturn vma;
vma168arch/sparc/kernel/ptrace.cstruct vm_area_struct * vma = find_extend_vma(tsk, addr);
vma170arch/sparc/kernel/ptrace.cif (!vma)
vma172arch/sparc/kernel/ptrace.c*result = get_long(tsk, vma, addr);
vma179arch/sparc/kernel/ptrace.cstruct vm_area_struct *vma = find_extend_vma(tsk, addr&~3);
vma182arch/sparc/kernel/ptrace.cif(!vma)
vma184arch/sparc/kernel/ptrace.ctmp = get_long(tsk, vma, (addr & ~3));
vma209arch/sparc/kernel/ptrace.cstruct vm_area_struct * vma = find_extend_vma(tsk, addr);
vma211arch/sparc/kernel/ptrace.cif (!vma)
vma213arch/sparc/kernel/ptrace.cput_long(tsk, vma, addr, data);
vma220arch/sparc/kernel/ptrace.cstruct vm_area_struct * vma = find_extend_vma(tsk, (addr & ~3));
vma223arch/sparc/kernel/ptrace.cif (!vma)
vma225arch/sparc/kernel/ptrace.ctmp = get_long(tsk, vma, (addr & ~3));
vma244arch/sparc/kernel/ptrace.cput_long(tsk, vma, (addr & ~3), tmp);
vma544arch/sparc/kernel/ptrace.cstruct vm_area_struct *vma;
vma552arch/sparc/kernel/ptrace.cvma = find_extend_vma(child, addr);
vma553arch/sparc/kernel/ptrace.cif(vma && request == PTRACE_POKEDATA && (vma->vm_flags & VM_EXEC)) {
vma557arch/sparc/kernel/smp.cvoid smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
vma558arch/sparc/kernel/smp.c{ xc2((smpfunc_t) local_flush_cache_page, (unsigned long) vma, page); }
vma560arch/sparc/kernel/smp.cvoid smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
vma561arch/sparc/kernel/smp.c{ xc2((smpfunc_t) local_flush_tlb_page, (unsigned long) vma, page); }
vma133arch/sparc/mm/fault.cstruct vm_area_struct *vma;
vma163arch/sparc/mm/fault.cvma = find_vma(current, address);
vma164arch/sparc/mm/fault.cif(!vma)
vma166arch/sparc/mm/fault.cif(vma->vm_start <= address)
vma168arch/sparc/mm/fault.cif(!(vma->vm_flags & VM_GROWSDOWN))
vma170arch/sparc/mm/fault.cif(expand_stack(vma, address))
vma178arch/sparc/mm/fault.cif(!(vma->vm_flags & VM_WRITE))
vma182arch/sparc/mm/fault.cif(!(vma->vm_flags & (VM_READ | VM_EXEC)))
vma185arch/sparc/mm/fault.chandle_mm_fault(vma, address, write);
vma217arch/sparc/mm/fault.cstruct vm_area_struct *vma;
vma219arch/sparc/mm/fault.cvma = find_vma(current, address);
vma220arch/sparc/mm/fault.cif(!vma)
vma222arch/sparc/mm/fault.cif(vma->vm_start <= address)
vma224arch/sparc/mm/fault.cif(!(vma->vm_flags & VM_GROWSDOWN))
vma226arch/sparc/mm/fault.cif(expand_stack(vma, address))
vma230arch/sparc/mm/fault.cif(!(vma->vm_flags & VM_WRITE))
vma233arch/sparc/mm/fault.cif(!(vma->vm_flags & (VM_READ | VM_EXEC)))
vma235arch/sparc/mm/fault.chandle_mm_fault(vma, address, write);
vma42arch/sparc/mm/loadmmu.cvoid (*update_mmu_cache)(struct vm_area_struct *vma, unsigned long address, pte_t pte);
vma585arch/sparc/mm/srmmu.cstatic void tsunami_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
vma588arch/sparc/mm/srmmu.cstruct mm_struct *mm = vma->vm_mm;
vma662arch/sparc/mm/srmmu.cstatic void tsunami_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
vma665arch/sparc/mm/srmmu.cstruct mm_struct *mm = vma->vm_mm;
vma722arch/sparc/mm/srmmu.cstatic void swift_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
vma725arch/sparc/mm/srmmu.cstruct mm_struct *mm = vma->vm_mm;
vma729arch/sparc/mm/srmmu.cif(vma->vm_flags & VM_EXEC)
vma777arch/sparc/mm/srmmu.cstatic void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
vma780arch/sparc/mm/srmmu.cstruct mm_struct *mm = vma->vm_mm;
vma835arch/sparc/mm/srmmu.cstatic void viking_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
vma838arch/sparc/mm/srmmu.cstruct mm_struct *mm = vma->vm_mm;
vma984arch/sparc/mm/srmmu.cstatic void viking_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
vma987arch/sparc/mm/srmmu.cstruct mm_struct *mm = vma->vm_mm;
vma1053arch/sparc/mm/srmmu.cstatic void cypress_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
vma1056arch/sparc/mm/srmmu.cstruct mm_struct *mm = vma->vm_mm;
vma1108arch/sparc/mm/srmmu.cstatic void hypersparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
vma1110arch/sparc/mm/srmmu.cstruct mm_struct *mm = vma->vm_mm;
vma1209arch/sparc/mm/srmmu.cstatic void hypersparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
vma1211arch/sparc/mm/srmmu.cstruct mm_struct *mm = vma->vm_mm;
vma1935arch/sparc/mm/srmmu.cstatic void srmmu_update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t pte)
vma568arch/sparc/mm/sun4c.cstatic void sun4c_update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
vma1032arch/sparc/mm/sun4c.cstatic void sun4c_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
vma1036arch/sparc/mm/sun4c.cstruct mm_struct *mm = vma->vm_mm;
vma1142arch/sparc/mm/sun4c.cstatic void sun4c_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
vma1144arch/sparc/mm/sun4c.cstruct mm_struct *mm = vma->vm_mm;
vma189drivers/char/fbmem.cfb_mmap(struct inode *inode, struct file *file, struct vm_area_struct * vma)
vma197drivers/char/fbmem.cif ((vma->vm_end - vma->vm_start + vma->vm_offset) > fix.smem_len)
vma199drivers/char/fbmem.cvma->vm_offset += fix.smem_start;
vma200drivers/char/fbmem.cif (vma->vm_offset & ~PAGE_MASK)
vma203drivers/char/fbmem.cpgprot_val(vma->vm_page_prot) &= _CACHEMASK040;
vma204drivers/char/fbmem.cif (remap_page_range(vma->vm_start, vma->vm_offset,
vma205drivers/char/fbmem.cvma->vm_end - vma->vm_start, vma->vm_page_prot))
vma207drivers/char/fbmem.cvma->vm_inode = inode;
vma99drivers/char/mem.cstatic int mmap_mem(struct inode * inode, struct file * file, struct vm_area_struct * vma)
vma101drivers/char/mem.cif (vma->vm_offset & ~PAGE_MASK)
vma110drivers/char/mem.cif (x86 > 3 && vma->vm_offset >= high_memory)
vma111drivers/char/mem.cpgprot_val(vma->vm_page_prot) |= _PAGE_PCD;
vma113drivers/char/mem.cif (remap_page_range(vma->vm_start, vma->vm_offset, vma->vm_end - vma->vm_start, vma->vm_page_prot))
vma115drivers/char/mem.cvma->vm_inode = inode;
vma183drivers/char/mem.cstatic int mmap_zero(struct inode * inode, struct file * file, struct vm_area_struct * vma)
vma185drivers/char/mem.cif (vma->vm_flags & VM_SHARED)
vma187drivers/char/mem.cif (zeromap_page_range(vma->vm_start, vma->vm_end - vma->vm_start, vma->vm_page_prot))
vma670drivers/sbus/char/suncons.cfb_mmap (struct inode *inode, struct file *file, struct vm_area_struct *vma)
vma688drivers/sbus/char/suncons.cv = (*fb->mmap)(inode, file, vma, fb->base, fb);
vma829drivers/sbus/char/suncons.ccg6_mmap (struct inode *inode, struct file *file, struct vm_area_struct *vma, long base, void *xx)
vma835drivers/sbus/char/suncons.csize = vma->vm_end - vma->vm_start;
vma836drivers/sbus/char/suncons.cif (vma->vm_offset & ~PAGE_MASK)
vma840drivers/sbus/char/suncons.cvma->vm_flags |= FB_MMAP_VM_FLAGS;
vma844drivers/sbus/char/suncons.cswitch (vma->vm_offset+page){
vma889drivers/sbus/char/suncons.cr = io_remap_page_range (vma->vm_start+page,
vma891drivers/sbus/char/suncons.cmap_size, vma->vm_page_prot,
vma896drivers/sbus/char/suncons.cvma->vm_inode = inode;
vma1082drivers/sbus/char/suncons.ccg3_mmap (struct inode *inode, struct file *file, struct vm_area_struct *vma, long base, void *xx)
vma1088drivers/sbus/char/suncons.csize = vma->vm_end - vma->vm_start;
vma1089drivers/sbus/char/suncons.cif (vma->vm_offset & ~PAGE_MASK)
vma1093drivers/sbus/char/suncons.cvma->vm_flags |= FB_MMAP_VM_FLAGS; 
vma1097drivers/sbus/char/suncons.cswitch (vma->vm_offset+page){
vma1112drivers/sbus/char/suncons.cr = io_remap_page_range (vma->vm_start+page,
vma1114drivers/sbus/char/suncons.cmap_size, vma->vm_page_prot,
vma1119drivers/sbus/char/suncons.cvma->vm_inode = inode;
vma1160drivers/sbus/char/suncons.cbwtwo_mmap (struct inode *inode, struct file *file, struct vm_area_struct *vma, long base, void *xx)
vma1166drivers/sbus/char/suncons.cmap_size = size = vma->vm_end - vma->vm_start;
vma1168drivers/sbus/char/suncons.cif (vma->vm_offset & ~PAGE_MASK)
vma1172drivers/sbus/char/suncons.cvma->vm_flags |= FB_MMAP_VM_FLAGS;
vma1175drivers/sbus/char/suncons.c(unsigned int) vma->vm_start, size,
vma1176drivers/sbus/char/suncons.c(unsigned int) vma->vm_offset);
vma1180drivers/sbus/char/suncons.cr = io_remap_page_range (vma->vm_start, map_offset, map_size, vma->vm_page_prot,
vma1183drivers/sbus/char/suncons.cvma->vm_inode = inode;
vma234drivers/sound/soundcard.csound_mmap (inode_handle * inode, file_handle * file, vm_area_handle * vma)
vma253drivers/sound/soundcard.cif ((vma_get_flags (vma) & (VM_READ | VM_WRITE)) == (VM_READ | VM_WRITE))
vma259drivers/sound/soundcard.cif (vma_get_flags (vma) & VM_READ)
vma263drivers/sound/soundcard.celse if (vma_get_flags (vma) & VM_WRITE)
vma291drivers/sound/soundcard.cif (vma_get_offset (vma) != 0)
vma297drivers/sound/soundcard.csize = vma_get_end (vma) - vma_get_start (vma);
vma306drivers/sound/soundcard.cif (remap_page_range (vma_get_start (vma), dmap->raw_buf_phys,
vma307drivers/sound/soundcard.cvma_get_end (vma) - vma_get_start (vma),
vma308drivers/sound/soundcard.cvma_get_page_prot (vma)))
vma311drivers/sound/soundcard.cvma_set_inode (vma, inode);
vma842fs/binfmt_elf.cstatic inline int maydump(struct vm_area_struct *vma)
vma844fs/binfmt_elf.cif (!(vma->vm_flags & (VM_READ|VM_WRITE|VM_EXEC)))
vma847fs/binfmt_elf.cif (vma->vm_flags & (VM_WRITE|VM_GROWSUP|VM_GROWSDOWN))
vma849fs/binfmt_elf.cif (vma->vm_flags & (VM_READ|VM_EXEC|VM_EXECUTABLE|VM_SHARED))
vma945fs/binfmt_elf.cstruct vm_area_struct *vma;
vma966fs/binfmt_elf.cfor(vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
vma967fs/binfmt_elf.cif (maydump(vma))
vma969fs/binfmt_elf.cint sz = vma->vm_end-vma->vm_start;
vma1151fs/binfmt_elf.cfor(vma = current->mm->mmap, i = 0;
vma1152fs/binfmt_elf.ci < segs && vma != NULL; vma = vma->vm_next) {
vma1158fs/binfmt_elf.csz = vma->vm_end - vma->vm_start;
vma1162fs/binfmt_elf.cphdr.p_vaddr = vma->vm_start;
vma1164fs/binfmt_elf.cphdr.p_filesz = maydump(vma) ? sz : 0;
vma1167fs/binfmt_elf.cphdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
vma1168fs/binfmt_elf.cif (vma->vm_flags & VM_WRITE) phdr.p_flags |= PF_W;
vma1169fs/binfmt_elf.cif (vma->vm_flags & VM_EXEC) phdr.p_flags |= PF_X;
vma1183fs/binfmt_elf.cfor(i = 0, vma = current->mm->mmap;
vma1184fs/binfmt_elf.ci < segs && vma != NULL;
vma1185fs/binfmt_elf.cvma = vma->vm_next) {
vma1186fs/binfmt_elf.cunsigned long addr = vma->vm_start;
vma1187fs/binfmt_elf.cunsigned long len = vma->vm_end - vma->vm_start;
vma1189fs/binfmt_elf.cif (!maydump(vma))
vma94fs/fat/mmap.cint fat_mmap(struct inode * inode, struct file * file, struct vm_area_struct * vma)
vma96fs/fat/mmap.cif (vma->vm_flags & VM_SHARED)  /* only PAGE_COW or read-only supported now */
vma98fs/fat/mmap.cif (vma->vm_offset & (inode->i_sb->s_blocksize - 1))
vma107fs/fat/mmap.cvma->vm_inode = inode;
vma109fs/fat/mmap.cvma->vm_ops = &fat_file_mmap;
vma253fs/locks.cstruct vm_area_struct *vma = inode->i_mmap;
vma255fs/locks.cif (vma->vm_flags & VM_MAYSHARE)
vma257fs/locks.cvma = vma->vm_next_share;
vma258fs/locks.c} while (vma != inode->i_mmap);
vma31fs/namei.cstruct vm_area_struct * vma;
vma35fs/namei.cvma = find_vma(current, address);
vma36fs/namei.cif (!vma || vma->vm_start > address || !(vma->vm_flags & VM_READ))
vma38fs/namei.caddress = vma->vm_end - address;
vma41fs/namei.cif (vma->vm_next && vma->vm_next->vm_start == vma->vm_end &&
vma42fs/namei.c(vma->vm_next->vm_flags & VM_READ))
vma132fs/ncpfs/mmap.cncp_mmap(struct inode * inode, struct file * file, struct vm_area_struct * vma)
vma142fs/ncpfs/mmap.cif (vma->vm_flags & VM_SHARED)  
vma151fs/ncpfs/mmap.cvma->vm_inode = inode;
vma153fs/ncpfs/mmap.cvma->vm_ops = &ncp_file_mmap;
vma94fs/nfs/file.cstatic int nfs_file_mmap(struct inode * inode, struct file * file, struct vm_area_struct * vma)
vma97fs/nfs/file.creturn generic_file_mmap(inode, file, vma);
vma552fs/proc/array.cstruct vm_area_struct * vma = mm->mmap;
vma556fs/proc/array.cfor (vma = mm->mmap; vma; vma = vma->vm_next) {
vma557fs/proc/array.cunsigned long len = (vma->vm_end - vma->vm_start) >> 10;
vma558fs/proc/array.cif (!vma->vm_inode) {
vma560fs/proc/array.cif (vma->vm_flags & VM_GROWSDOWN)
vma564fs/proc/array.cif (vma->vm_flags & VM_WRITE)
vma566fs/proc/array.cif (vma->vm_flags & VM_EXEC) {
vma568fs/proc/array.cif (vma->vm_flags & VM_EXECUTABLE)
vma655fs/proc/array.cstruct vm_area_struct *vma = tsk->mm->mmap;
vma656fs/proc/array.cwhile (vma) {
vma657fs/proc/array.cvsize += vma->vm_end - vma->vm_start;
vma658fs/proc/array.cvma = vma->vm_next;
vma816fs/proc/array.cstruct vm_area_struct * vma = tsk->mm->mmap;
vma818fs/proc/array.cwhile (vma) {
vma819fs/proc/array.cpgd_t *pgd = pgd_offset(tsk->mm, vma->vm_start);
vma822fs/proc/array.cstatm_pgd_range(pgd, vma->vm_start, vma->vm_end, &pages, &shared, &dirty, &total);
vma827fs/proc/array.cif (vma->vm_flags & VM_EXECUTABLE)
vma829fs/proc/array.celse if (vma->vm_flags & VM_GROWSDOWN)
vma831fs/proc/array.celse if (vma->vm_end > 0x60000000)
vma835fs/proc/array.cvma = vma->vm_next;
vma105fs/proc/link.cstruct vm_area_struct * vma;
vma108fs/proc/link.cvma = p->mm->mmap;
vma109fs/proc/link.cwhile (vma) {
vma110fs/proc/link.cif (vma->vm_flags & VM_EXECUTABLE) {
vma111fs/proc/link.cnew_inode = vma->vm_inode;
vma114fs/proc/link.cvma = vma->vm_next;
vma28fs/proc/mem.cstruct vm_area_struct *vma;
vma31fs/proc/mem.cvma = find_vma(tsk, addr);
vma32fs/proc/mem.cif (!vma)
vma34fs/proc/mem.cif (vma->vm_start > addr)
vma36fs/proc/mem.cif (!(vma->vm_flags & VM_READ))
vma38fs/proc/mem.cwhile ((retval = vma->vm_end - addr) < count) {
vma39fs/proc/mem.cstruct vm_area_struct *next = vma->vm_next;
vma42fs/proc/mem.cif (vma->vm_end != next->vm_start)
vma46fs/proc/mem.cvma = next;
vma194fs/proc/mem.cstruct vm_area_struct * vma)
vma222fs/proc/mem.cstmp = vma->vm_offset;
vma223fs/proc/mem.cwhile (stmp < vma->vm_offset + (vma->vm_end - vma->vm_start)) {
vma257fs/proc/mem.cstmp    = vma->vm_offset;
vma258fs/proc/mem.cdtmp    = vma->vm_start;
vma260fs/proc/mem.cflush_cache_range(vma->vm_mm, vma->vm_start, vma->vm_end);
vma262fs/proc/mem.cwhile (dtmp < vma->vm_end) {
vma281fs/proc/mem.cif ((vma->vm_flags & VM_WRITE) && !pte_write(*src_table))
vma292fs/proc/mem.cflush_tlb_range(vma->vm_mm, vma->vm_start, vma->vm_end);
vma104fs/smbfs/mmap.csmb_mmap(struct inode * inode, struct file * file, struct vm_area_struct * vma)
vma109fs/smbfs/mmap.cif (vma->vm_flags & VM_SHARED)  
vma118fs/smbfs/mmap.cvma->vm_inode = inode;
vma120fs/smbfs/mmap.cvma->vm_ops = &smb_file_mmap;
vma790fs/super.cstruct vm_area_struct * vma;
vma796fs/super.cvma = find_vma(current, (unsigned long) data);
vma797fs/super.cif (!vma || (unsigned long) data < vma->vm_start)
vma799fs/super.cif (!(vma->vm_flags & VM_READ))
vma801fs/super.ci = vma->vm_end - (unsigned long) data;
vma19include/asm-alpha/pgtable.h#define flush_cache_page(vma, vmaddr)    do { } while (0)
vma61include/asm-alpha/pgtable.hstruct vm_area_struct *vma,
vma65include/asm-alpha/pgtable.htbi(2 + ((vma->vm_flags & VM_EXEC) != 0), addr);
vma67include/asm-alpha/pgtable.hif (vma->vm_flags & VM_EXEC)
vma110include/asm-alpha/pgtable.hstatic inline void flush_tlb_page(struct vm_area_struct *vma,
vma113include/asm-alpha/pgtable.hstruct mm_struct * mm = vma->vm_mm;
vma118include/asm-alpha/pgtable.hflush_tlb_current_page(mm, vma, addr);
vma500include/asm-alpha/pgtable.hextern inline void update_mmu_cache(struct vm_area_struct * vma,
vma26include/asm-i386/pgtable.h#define flush_cache_page(vma, vmaddr)    do { } while (0)
vma63include/asm-i386/pgtable.hstatic inline void flush_tlb_page(struct vm_area_struct *vma,
vma66include/asm-i386/pgtable.hif (vma->vm_mm == current->mm)
vma124include/asm-i386/pgtable.hstatic inline void flush_tlb_page(struct vm_area_struct * vma,
vma127include/asm-i386/pgtable.hif (vma->vm_mm == current->mm && current->mm->count == 1)
vma152include/asm-i386/pgtable.hstatic inline void flush_tlb_page(struct vm_area_struct *vma,
vma475include/asm-i386/pgtable.hextern inline void update_mmu_cache(struct vm_area_struct * vma,
vma40include/asm-m68k/pgtable.hstatic inline void flush_tlb_page(struct vm_area_struct *vma,
vma43include/asm-m68k/pgtable.hif (vma->vm_mm == current->mm)
vma534include/asm-m68k/pgtable.h#define flush_cache_page(vma, addr)     flush_cache_all()
vma569include/asm-m68k/pgtable.hextern inline void update_mmu_cache(struct vm_area_struct * vma,
vma551include/asm-mips/pgtable.hextern void update_mmu_cache(struct vm_area_struct * vma,
vma483include/asm-ppc/pgtable.hextern inline void update_mmu_cache(struct vm_area_struct * vma,
vma487include/asm-ppc/pgtable.hprintk("Update MMU cache - VMA: %x, Addr: %x, PTE: %x\n", vma, address, *(long *)&_pte);
vma488include/asm-ppc/pgtable.h_printk("Update MMU cache - VMA: %x, Addr: %x, PTE: %x\n", vma, address, *(long *)&_pte);
vma275include/asm-sparc/pgtable.hextern void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
vma311include/asm-sparc/pgtable.hextern void (*set_pte)(struct vm_area_struct *vma, unsigned long address,
vma323include/asm-sparc/pgtable.hextern void (*update_mmu_cache)(struct vm_area_struct *vma, unsigned long address, pte_t pte);
vma262include/linux/mm.hextern int copy_page_range(struct mm_struct *dst, struct mm_struct *src, struct vm_area_struct *vma);
vma267include/linux/mm.hextern void handle_mm_fault(struct vm_area_struct *vma, unsigned long address, int write_access);
vma268include/linux/mm.hextern void do_wp_page(struct task_struct * tsk, struct vm_area_struct * vma, unsigned long address, int write_access);
vma269include/linux/mm.hextern void do_no_page(struct task_struct * tsk, struct vm_area_struct * vma, unsigned long address, int write_access);
vma316include/linux/mm.hstatic inline int expand_stack(struct vm_area_struct * vma, unsigned long address)
vma321include/linux/mm.hif (vma->vm_end - address > current->rlim[RLIMIT_STACK].rlim_cur)
vma323include/linux/mm.hgrow = vma->vm_start - address;
vma324include/linux/mm.hvma->vm_start = address;
vma325include/linux/mm.hvma->vm_offset -= grow;
vma326include/linux/mm.hvma->vm_mm->total_vm += grow >> PAGE_SHIFT;
vma327include/linux/mm.hif (vma->vm_flags & VM_LOCKED)
vma328include/linux/mm.hvma->vm_mm->locked_vm += grow >> PAGE_SHIFT;
vma360include/linux/mm.hstruct vm_area_struct * vma;
vma362include/linux/mm.hvma = find_vma(task,start_addr);
vma363include/linux/mm.hif (!vma || end_addr <= vma->vm_start)
vma365include/linux/mm.hreturn vma;
vma161include/linux/ncp_fs.hint ncp_mmap(struct inode * inode, struct file * file, struct vm_area_struct * vma);
vma136include/linux/nfs_fs.hextern int nfs_mmap(struct inode * inode, struct file * file, struct vm_area_struct * vma);
vma177include/linux/smb_fs.hint smb_mmap(struct inode * inode, struct file * file, struct vm_area_struct * vma);
vma801mm/filemap.cstatic int filemap_write_page(struct vm_area_struct * vma,
vma821mm/filemap.cinode = vma->vm_inode;
vma849mm/filemap.cint filemap_swapout(struct vm_area_struct * vma,
vma857mm/filemap.cflush_cache_page(vma, (offset + vma->vm_start - vma->vm_offset));
vma859mm/filemap.cflush_tlb_page(vma, (offset + vma->vm_start - vma->vm_offset));
vma860mm/filemap.cerror = filemap_write_page(vma, offset, page);
vma872mm/filemap.cstatic pte_t filemap_swapin(struct vm_area_struct * vma,
vma880mm/filemap.creturn mk_pte(page,vma->vm_page_prot);
vma884mm/filemap.cstatic inline int filemap_sync_pte(pte_t * ptep, struct vm_area_struct *vma,
vma897mm/filemap.cflush_cache_page(vma, address);
vma899mm/filemap.cflush_tlb_page(vma, address);
vma905mm/filemap.cflush_cache_page(vma, address);
vma907mm/filemap.cflush_tlb_page(vma, address);
vma918mm/filemap.cerror = filemap_write_page(vma, address - vma->vm_start + vma->vm_offset, page);
vma925mm/filemap.cstruct vm_area_struct *vma, unsigned long offset, unsigned int flags)
vma946mm/filemap.cerror |= filemap_sync_pte(pte, vma, address + offset, flags);
vma955mm/filemap.cstruct vm_area_struct *vma, unsigned int flags)
vma976mm/filemap.cerror |= filemap_sync_pte_range(pmd, address, end - address, vma, offset, flags);
vma983mm/filemap.cstatic int filemap_sync(struct vm_area_struct * vma, unsigned long address,
vma990mm/filemap.cdir = pgd_offset(vma->vm_mm, address);
vma991mm/filemap.cflush_cache_range(vma->vm_mm, end - size, end);
vma993mm/filemap.cerror |= filemap_sync_pmd_range(dir, address, end - address, vma, flags);
vma997mm/filemap.cflush_tlb_range(vma->vm_mm, end - size, end);
vma1004mm/filemap.cstatic void filemap_unmap(struct vm_area_struct *vma, unsigned long start, size_t len)
vma1006mm/filemap.cfilemap_sync(vma, start, len, MS_ASYNC);
vma1047mm/filemap.cint generic_file_mmap(struct inode * inode, struct file * file, struct vm_area_struct * vma)
vma1051mm/filemap.cif ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) {
vma1055mm/filemap.cif (vma->vm_offset & (PAGE_SIZE - 1))
vma1059mm/filemap.cif (vma->vm_offset & (inode->i_sb->s_blocksize - 1))
vma1070mm/filemap.cvma->vm_inode = inode;
vma1072mm/filemap.cvma->vm_ops = ops;
vma1081mm/filemap.cstatic int msync_interval(struct vm_area_struct * vma,
vma1084mm/filemap.cif (!vma->vm_inode)
vma1086mm/filemap.cif (vma->vm_ops->sync) {
vma1088mm/filemap.cerror = vma->vm_ops->sync(vma, start, end-start, flags);
vma1092mm/filemap.creturn file_fsync(vma->vm_inode, NULL);
vma1101mm/filemap.cstruct vm_area_struct * vma;
vma1118mm/filemap.cvma = find_vma(current, start);
vma1122mm/filemap.cif (!vma)
vma1125mm/filemap.cif (start < vma->vm_start) {
vma1127mm/filemap.cstart = vma->vm_start;
vma1130mm/filemap.cif (end <= vma->vm_end) {
vma1132mm/filemap.cerror = msync_interval(vma, start, end, flags);
vma1139mm/filemap.cerror = msync_interval(vma, start, vma->vm_end, flags);
vma1142mm/filemap.cstart = vma->vm_end;
vma1143mm/filemap.cvma = vma->vm_next;
vma277mm/memory.cstruct vm_area_struct *vma)
vma280mm/memory.cunsigned long address = vma->vm_start;
vma281mm/memory.cunsigned long end = vma->vm_end;
vma284mm/memory.ccow = (vma->vm_flags & (VM_SHARED | VM_WRITE)) == VM_WRITE;
vma287mm/memory.cflush_cache_range(src, vma->vm_start, vma->vm_end);
vma288mm/memory.cflush_cache_range(dst, vma->vm_start, vma->vm_end);
vma296mm/memory.cflush_tlb_range(src, vma->vm_start, vma->vm_end);
vma297mm/memory.cflush_tlb_range(dst, vma->vm_start, vma->vm_end);
vma590mm/memory.cvoid do_wp_page(struct task_struct * tsk, struct vm_area_struct * vma,
vma599mm/memory.cpage_dir = pgd_offset(vma->vm_mm, address);
vma625mm/memory.c++vma->vm_mm->rss;
vma629mm/memory.cflush_cache_page(vma, address);
vma630mm/memory.cset_pte(page_table, pte_mkwrite(pte_mkdirty(mk_pte(new_page, vma->vm_page_prot))));
vma632mm/memory.cflush_tlb_page(vma, address);
vma635mm/memory.cflush_cache_page(vma, address);
vma637mm/memory.cflush_tlb_page(vma, address);
vma642mm/memory.cflush_cache_page(vma, address);
vma644mm/memory.cflush_tlb_page(vma, address);
vma670mm/memory.cstruct vm_area_struct * vma;
vma680mm/memory.cvma = find_vma(current, start);
vma681mm/memory.cif (!vma)
vma683mm/memory.cif (vma->vm_start > start)
vma691mm/memory.cif (!(vma->vm_flags & VM_READ))
vma693mm/memory.cif (vma->vm_end - start >= size)
vma695mm/memory.cnext = vma->vm_next;
vma696mm/memory.cif (!next || vma->vm_end != next->vm_start)
vma698mm/memory.cvma = next;
vma702mm/memory.cif (!(vma->vm_flags & VM_WRITE))
vma707mm/memory.cif (vma->vm_end - start >= size)
vma709mm/memory.cif (!vma->vm_next || vma->vm_end != vma->vm_next->vm_start)
vma711mm/memory.cvma = vma->vm_next;
vma712mm/memory.cif (!(vma->vm_flags & VM_WRITE))
vma724mm/memory.cdo_wp_page(current, vma, start, 1);
vma729mm/memory.cif (start < vma->vm_end)
vma731mm/memory.cvma = vma->vm_next;
vma732mm/memory.cif (!vma || vma->vm_start != start)
vma734mm/memory.cif (!(vma->vm_flags & VM_WRITE))
vma740mm/memory.cif (!(vma->vm_flags & VM_GROWSDOWN))
vma742mm/memory.cif (expand_stack(vma, start) == 0)
vma749mm/memory.cstatic inline void get_empty_page(struct task_struct * tsk, struct vm_area_struct * vma,
vma754mm/memory.cpte = pte_wrprotect(mk_pte(ZERO_PAGE, vma->vm_page_prot));
vma757mm/memory.cpte = pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
vma758mm/memory.cvma->vm_mm->rss++;
vma772mm/memory.cstatic void partial_clear(struct vm_area_struct *vma, unsigned long address)
vma778mm/memory.cpage_dir = pgd_offset(vma->vm_mm, address);
vma798mm/memory.cflush_cache_page(vma, address);
vma872mm/memory.cstruct vm_area_struct * vma, unsigned long address,
vma877mm/memory.cif (!vma->vm_ops || !vma->vm_ops->swapin) {
vma878mm/memory.cswap_in(tsk, vma, page_table, pte_val(entry), write_access);
vma882mm/memory.cpage = vma->vm_ops->swapin(vma, address - vma->vm_start + vma->vm_offset, pte_val(entry));
vma887mm/memory.cif (mem_map[MAP_NR(pte_page(page))].count > 1 && !(vma->vm_flags & VM_SHARED))
vma889mm/memory.c++vma->vm_mm->rss;
vma902mm/memory.cvoid do_no_page(struct task_struct * tsk, struct vm_area_struct * vma,
vma916mm/memory.cdo_swap_page(tsk, vma, address, page_table, entry, write_access);
vma920mm/memory.cif (!vma->vm_ops || !vma->vm_ops->nopage) {
vma921mm/memory.cflush_cache_page(vma, address);
vma922mm/memory.cget_empty_page(tsk, vma, page_table, write_access);
vma926mm/memory.c++vma->vm_mm->rss;
vma932mm/memory.cpage = vma->vm_ops->nopage(vma, address, write_access && !(vma->vm_flags & VM_SHARED));
vma935mm/memory.cflush_cache_page(vma, address);
vma937mm/memory.cflush_tlb_page(vma, address);
vma951mm/memory.centry = mk_pte(page, vma->vm_page_prot);
vma954mm/memory.c} else if (mem_map[MAP_NR(page)].count > 1 && !(vma->vm_flags & VM_SHARED))
vma956mm/memory.cflush_cache_page(vma, address);
vma958mm/memory.cflush_tlb_page(vma, address);
vma974mm/memory.cstatic inline void handle_pte_fault(struct vm_area_struct * vma, unsigned long address,
vma978mm/memory.cdo_no_page(current, vma, address, write_access);
vma982mm/memory.cflush_tlb_page(vma, address);
vma987mm/memory.cflush_tlb_page(vma, address);
vma990mm/memory.cdo_wp_page(current, vma, address, write_access);
vma993mm/memory.cvoid handle_mm_fault(struct vm_area_struct * vma, unsigned long address,
vma1000mm/memory.cpgd = pgd_offset(vma->vm_mm, address);
vma1007mm/memory.chandle_pte_fault(vma, address, write_access, pte);
vma1008mm/memory.cupdate_mmu_cache(vma, address, *pte);
vma20mm/mlock.cstatic inline int mlock_fixup_all(struct vm_area_struct * vma, int newflags)
vma22mm/mlock.cvma->vm_flags = newflags;
vma26mm/mlock.cstatic inline int mlock_fixup_start(struct vm_area_struct * vma,
vma34mm/mlock.c*n = *vma;
vma35mm/mlock.cvma->vm_start = end;
vma37mm/mlock.cvma->vm_offset += vma->vm_start - n->vm_start;
vma47mm/mlock.cstatic inline int mlock_fixup_end(struct vm_area_struct * vma,
vma55mm/mlock.c*n = *vma;
vma56mm/mlock.cvma->vm_end = start;
vma58mm/mlock.cn->vm_offset += n->vm_start - vma->vm_start;
vma68mm/mlock.cstatic inline int mlock_fixup_middle(struct vm_area_struct * vma,
vma81mm/mlock.c*left = *vma;
vma82mm/mlock.c*right = *vma;
vma84mm/mlock.cvma->vm_start = start;
vma85mm/mlock.cvma->vm_end = end;
vma87mm/mlock.cvma->vm_offset += vma->vm_start - left->vm_start;
vma89mm/mlock.cvma->vm_flags = newflags;
vma90mm/mlock.cif (vma->vm_inode)
vma91mm/mlock.cvma->vm_inode->i_count += 2;
vma92mm/mlock.cif (vma->vm_ops && vma->vm_ops->open) {
vma93mm/mlock.cvma->vm_ops->open(left);
vma94mm/mlock.cvma->vm_ops->open(right);
vma101mm/mlock.cstatic int mlock_fixup(struct vm_area_struct * vma, 
vma106mm/mlock.cif (newflags == vma->vm_flags)
vma109mm/mlock.cif (start == vma->vm_start) {
vma110mm/mlock.cif (end == vma->vm_end)
vma111mm/mlock.cretval = mlock_fixup_all(vma, newflags);
vma113mm/mlock.cretval = mlock_fixup_start(vma, end, newflags);
vma115mm/mlock.cif (end == vma->vm_end)
vma116mm/mlock.cretval = mlock_fixup_end(vma, start, newflags);
vma118mm/mlock.cretval = mlock_fixup_middle(vma, start, end, newflags);
vma125mm/mlock.cvma->vm_mm->locked_vm += pages;
vma140mm/mlock.cstruct vm_area_struct * vma, * next;
vma151mm/mlock.cvma = find_vma(current, start);
vma152mm/mlock.cif (!vma || vma->vm_start > start)
vma160mm/mlock.cnewflags = vma->vm_flags | VM_LOCKED;
vma164mm/mlock.cif (vma->vm_end >= end) {
vma165mm/mlock.cerror = mlock_fixup(vma, nstart, end, newflags);
vma169mm/mlock.ctmp = vma->vm_end;
vma170mm/mlock.cnext = vma->vm_next;
vma171mm/mlock.cerror = mlock_fixup(vma, nstart, tmp, newflags);
vma175mm/mlock.cvma = next;
vma176mm/mlock.cif (!vma || vma->vm_start != nstart) {
vma222mm/mlock.cstruct vm_area_struct * vma;
vma233mm/mlock.cfor (vma = current->mm->mmap; vma ; vma = vma->vm_next) {
vma236mm/mlock.cnewflags = vma->vm_flags | VM_LOCKED;
vma239mm/mlock.cerror = mlock_fixup(vma, vma->vm_start, vma->vm_end, newflags);
vma45mm/mmap.cstruct vm_area_struct * vma;
vma121mm/mmap.cvma = (struct vm_area_struct *)kmalloc(sizeof(struct vm_area_struct),
vma123mm/mmap.cif (!vma)
vma126mm/mmap.cvma->vm_mm = current->mm;
vma127mm/mmap.cvma->vm_start = addr;
vma128mm/mmap.cvma->vm_end = addr + len;
vma129mm/mmap.cvma->vm_flags = prot & (VM_READ | VM_WRITE | VM_EXEC);
vma130mm/mmap.cvma->vm_flags |= flags & (VM_GROWSDOWN | VM_DENYWRITE | VM_EXECUTABLE);
vma131mm/mmap.cvma->vm_flags |= current->mm->def_flags;
vma135mm/mmap.cvma->vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
vma137mm/mmap.cvma->vm_flags |= VM_SHARED | VM_MAYSHARE;
vma149mm/mmap.cvma->vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
vma152mm/mmap.cvma->vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
vma153mm/mmap.cvma->vm_page_prot = protection_map[vma->vm_flags & 0x0f];
vma154mm/mmap.cvma->vm_ops = NULL;
vma155mm/mmap.cvma->vm_offset = off;
vma156mm/mmap.cvma->vm_inode = NULL;
vma157mm/mmap.cvma->vm_pte = 0;
vma162mm/mmap.cint error = file->f_op->mmap(file->f_inode, file, vma);
vma165mm/mmap.ckfree(vma);
vma170mm/mmap.cflags = vma->vm_flags;
vma171mm/mmap.cinsert_vm_struct(current, vma);
vma172mm/mmap.cmerge_segments(current, vma->vm_start, vma->vm_end);
vma481mm/mmap.cstatic void printk_list (struct vm_area_struct * vma)
vma484mm/mmap.cwhile (vma) {
vma485mm/mmap.cprintk("%08lX-%08lX", vma->vm_start, vma->vm_end);
vma486mm/mmap.cvma = vma->vm_next;
vma487mm/mmap.cif (!vma)
vma749mm/mmap.cstruct vm_area_struct * vma;
vma752mm/mmap.cfor (vma = mm->mmap; vma; vma = vma->vm_next)
vma753mm/mmap.cavl_insert(vma, &mm->mmap_avl);
vma88mm/mprotect.cstatic inline int mprotect_fixup_all(struct vm_area_struct * vma,
vma91mm/mprotect.cvma->vm_flags = newflags;
vma92mm/mprotect.cvma->vm_page_prot = prot;
vma96mm/mprotect.cstatic inline int mprotect_fixup_start(struct vm_area_struct * vma,
vma105mm/mprotect.c*n = *vma;
vma106mm/mprotect.cvma->vm_start = end;
vma108mm/mprotect.cvma->vm_offset += vma->vm_start - n->vm_start;
vma119mm/mprotect.cstatic inline int mprotect_fixup_end(struct vm_area_struct * vma,
vma128mm/mprotect.c*n = *vma;
vma129mm/mprotect.cvma->vm_end = start;
vma131mm/mprotect.cn->vm_offset += n->vm_start - vma->vm_start;
vma142mm/mprotect.cstatic inline int mprotect_fixup_middle(struct vm_area_struct * vma,
vma156mm/mprotect.c*left = *vma;
vma157mm/mprotect.c*right = *vma;
vma159mm/mprotect.cvma->vm_start = start;
vma160mm/mprotect.cvma->vm_end = end;
vma162mm/mprotect.cvma->vm_offset += vma->vm_start - left->vm_start;
vma164mm/mprotect.cvma->vm_flags = newflags;
vma165mm/mprotect.cvma->vm_page_prot = prot;
vma166mm/mprotect.cif (vma->vm_inode)
vma167mm/mprotect.cvma->vm_inode->i_count += 2;
vma168mm/mprotect.cif (vma->vm_ops && vma->vm_ops->open) {
vma169mm/mprotect.cvma->vm_ops->open(left);
vma170mm/mprotect.cvma->vm_ops->open(right);
vma177mm/mprotect.cstatic int mprotect_fixup(struct vm_area_struct * vma, 
vma183mm/mprotect.cif (newflags == vma->vm_flags)
vma186mm/mprotect.cif (start == vma->vm_start)
vma187mm/mprotect.cif (end == vma->vm_end)
vma188mm/mprotect.cerror = mprotect_fixup_all(vma, newflags, newprot);
vma190mm/mprotect.cerror = mprotect_fixup_start(vma, end, newflags, newprot);
vma191mm/mprotect.celse if (end == vma->vm_end)
vma192mm/mprotect.cerror = mprotect_fixup_end(vma, start, newflags, newprot);
vma194mm/mprotect.cerror = mprotect_fixup_middle(vma, start, end, newflags, newprot);
vma206mm/mprotect.cstruct vm_area_struct * vma, * next;
vma219mm/mprotect.cvma = find_vma(current, start);
vma220mm/mprotect.cif (!vma || vma->vm_start > start)
vma228mm/mprotect.cnewflags = prot | (vma->vm_flags & ~(PROT_READ | PROT_WRITE | PROT_EXEC));
vma234mm/mprotect.cif (vma->vm_end >= end) {
vma235mm/mprotect.cerror = mprotect_fixup(vma, nstart, end, newflags);
vma239mm/mprotect.ctmp = vma->vm_end;
vma240mm/mprotect.cnext = vma->vm_next;
vma241mm/mprotect.cerror = mprotect_fixup(vma, nstart, tmp, newflags);
vma245mm/mprotect.cvma = next;
vma246mm/mprotect.cif (!vma || vma->vm_start != nstart) {
vma127mm/mremap.cstatic inline unsigned long move_vma(struct vm_area_struct * vma,
vma138mm/mremap.c*new_vma = *vma;
vma141mm/mremap.cnew_vma->vm_offset = vma->vm_offset + (addr - vma->vm_start);
vma164mm/mremap.cstruct vm_area_struct *vma;
vma185mm/mremap.cvma = find_vma(current, addr);
vma186mm/mremap.cif (!vma || vma->vm_start > addr)
vma189mm/mremap.cif (old_len > vma->vm_end - addr)
vma191mm/mremap.cif (vma->vm_flags & VM_LOCKED) {
vma199mm/mremap.cif (old_len == vma->vm_end - addr) {
vma201mm/mremap.cif (vma->vm_next)
vma202mm/mremap.cmax_addr = vma->vm_next->vm_start;
vma206mm/mremap.cvma->vm_end = addr + new_len;
vma208mm/mremap.cif (vma->vm_flags & VM_LOCKED)
vma219mm/mremap.creturn move_vma(vma, addr, old_len, new_len);
vma291mm/page_alloc.cvoid swap_in(struct task_struct * tsk, struct vm_area_struct * vma,
vma311mm/page_alloc.cvma->vm_mm->rss++;
vma315mm/page_alloc.cset_pte(page_table, mk_pte(page, vma->vm_page_prot));
vma318mm/page_alloc.cset_pte(page_table, pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot))));
vma163mm/swapfile.cstatic inline int unuse_pte(struct vm_area_struct * vma, unsigned long address,
vma189mm/swapfile.cset_pte(dir, pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot))));
vma190mm/swapfile.c++vma->vm_mm->rss;
vma195mm/swapfile.cstatic inline int unuse_pmd(struct vm_area_struct * vma, pmd_t *dir,
vma216mm/swapfile.cif (unuse_pte(vma, offset+address-vma->vm_start, pte, type, page))
vma224mm/swapfile.cstatic inline int unuse_pgd(struct vm_area_struct * vma, pgd_t *dir,
vma245mm/swapfile.cif (unuse_pmd(vma, pmd, address, end - address, offset, type, page))
vma253mm/swapfile.cstatic int unuse_vma(struct vm_area_struct * vma, pgd_t *pgdir,
vma258mm/swapfile.cif (unuse_pgd(vma, pgdir, start, end - start, type, page))
vma268mm/swapfile.cstruct vm_area_struct* vma;
vma275mm/swapfile.cvma = mm->mmap;
vma276mm/swapfile.cwhile (vma) {
vma277mm/swapfile.cpgd_t * pgd = pgd_offset(mm, vma->vm_start);
vma278mm/swapfile.cif (unuse_vma(vma, pgd, vma->vm_start, vma->vm_end, type, page))
vma280mm/swapfile.cvma = vma->vm_next;
vma70mm/vmscan.cstatic inline int try_to_swap_out(struct task_struct * tsk, struct vm_area_struct* vma,
vma103mm/vmscan.cif (vma->vm_ops && vma->vm_ops->swapout) {
vma105mm/vmscan.cvma->vm_mm->rss--;
vma106mm/vmscan.cif (vma->vm_ops->swapout(vma, address - vma->vm_start + vma->vm_offset, page_table))
vma113mm/vmscan.cvma->vm_mm->rss--;
vma114mm/vmscan.cflush_cache_page(vma, address);
vma116mm/vmscan.cflush_tlb_page(vma, address);
vma129mm/vmscan.cvma->vm_mm->rss--;
vma130mm/vmscan.cflush_cache_page(vma, address);
vma132mm/vmscan.cflush_tlb_page(vma, address);
vma136mm/vmscan.cvma->vm_mm->rss--;
vma137mm/vmscan.cflush_cache_page(vma, address);
vma139mm/vmscan.cflush_tlb_page(vma, address);
vma159mm/vmscan.cstatic inline int swap_out_pmd(struct task_struct * tsk, struct vm_area_struct * vma,
vma182mm/vmscan.cresult = try_to_swap_out(tsk, vma, address, pte, dma, wait);
vma191mm/vmscan.cstatic inline int swap_out_pgd(struct task_struct * tsk, struct vm_area_struct * vma,
vma212mm/vmscan.cint result = swap_out_pmd(tsk, vma, pmd, address, end, dma, wait);
vma221mm/vmscan.cstatic int swap_out_vma(struct task_struct * tsk, struct vm_area_struct * vma,
vma228mm/vmscan.cif (vma->vm_flags & (VM_SHM | VM_LOCKED))
vma231mm/vmscan.cend = vma->vm_end;
vma233mm/vmscan.cint result = swap_out_pgd(tsk, vma, pgdir, start, end, dma, wait);
vma245mm/vmscan.cstruct vm_area_struct* vma;
vma256mm/vmscan.cvma = find_vma(p, address);
vma257mm/vmscan.cif (!vma)
vma259mm/vmscan.cif (address < vma->vm_start)
vma260mm/vmscan.caddress = vma->vm_start;
vma263mm/vmscan.cint result = swap_out_vma(p, vma, pgd_offset(p->mm, address), address, dma, wait);
vma266mm/vmscan.cvma = vma->vm_next;
vma267mm/vmscan.cif (!vma)
vma269mm/vmscan.caddress = vma->vm_start;